}
p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
- wait_on_xen_event_channel(v->arch.hvm.xen_port,
+ wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
p->state != STATE_IOREQ_READY &&
p->state != STATE_IOREQ_INPROCESS);
- if ( p->state == STATE_IORESP_READY )
+ switch ( p->state )
+ {
+ case STATE_IORESP_READY:
hvm_io_assist(v);
- if ( p->state != STATE_INVALID ) {
+ break;
+ case STATE_INVALID:
+ break;
+ default:
printf("Weird HVM iorequest state %d.\n", p->state);
domain_crash(v->domain);
}
domain_crash(v->domain);
return;
}
- wmb();
+
+ prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
p->state = STATE_IOREQ_READY;
notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
}
ENTRY(svm_asm_do_resume)
svm_test_all_events:
GET_CURRENT(%ebx)
- pushl %ebx
- call hvm_do_resume
- addl $4, %esp
/*test_all_events:*/
xorl %ecx,%ecx
notl %ecx
ENTRY(svm_asm_do_resume)
svm_test_all_events:
GET_CURRENT(%rbx)
- movq %rbx, %rdi
- call hvm_do_resume
/*test_all_events:*/
cli # tests must not race interrupts
/*test_softirqs:*/
ALIGN
ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(%ebx)
- pushl %ebx
- call hvm_do_resume
- addl $4, %esp
cli # tests must not race interrupts
movl VCPU_processor(%ebx),%eax
ALIGN
ENTRY(vmx_asm_do_vmentry)
GET_CURRENT(%rbx)
- movq %rbx, %rdi
- call hvm_do_resume
cli # tests must not race interrupts
movl VCPU_processor(%rbx),%eax
do_softirq(); \
} while ( 0 )
+#define prepare_wait_on_xen_event_channel(port) \
+ do { \
+ set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ raise_softirq(SCHEDULE_SOFTIRQ); \
+ mb(); /* set blocked status /then/ caller does his work */ \
+ } while ( 0 )
+
#endif /* __XEN_EVENT_H__ */